library(AppliedPredictiveModeling)
library(tidyverse)
library(caret)
library(mlbench)
library(naniar)Friedman (1991) introduced several benchmark data sets create by simulation. One of these simulations used the following nonlinear equation to create data:
\[y = 10 sin(\pi x_1 x_2) + 20(x_3 − 0.5)^2 + 10x_4 + 5x_5 + N(0, \sigma^2)\]
where the x values are random variables uniformly distributed between [0, 1] (there are also 5 other non-informative variables also created in the simulation). The package mlbench contains a function called mlbench.friedman1 that simulates these data:
set.seed(200)
trainingData <- mlbench.friedman1(200, sd=1)
trainingData$x <- data.frame(trainingData$x)
# featurePlot
featurePlot(trainingData$x, trainingData$y)glimpse(trainingData$x)## Rows: 200
## Columns: 10
## $ X1 <dbl> 0.53377245, 0.58376503, 0.58957830, 0.69103989, 0.66733150, 0.8392…
## $ X2 <dbl> 0.64780643, 0.43815276, 0.58790649, 0.22595475, 0.81889851, 0.3862…
## $ X3 <dbl> 0.85078526, 0.67272659, 0.40967108, 0.03335447, 0.71676079, 0.6461…
## $ X4 <dbl> 0.181599574, 0.669249143, 0.338127280, 0.066912736, 0.803242873, 0…
## $ X5 <dbl> 0.929039760, 0.163797838, 0.894093335, 0.637445191, 0.083068641, 0…
## $ X6 <dbl> 0.36179060, 0.45305931, 0.02681911, 0.52500637, 0.22344157, 0.4370…
## $ X7 <dbl> 0.826660859, 0.648960076, 0.178561450, 0.513361395, 0.664490604, 0…
## $ X8 <dbl> 0.42140806, 0.84462393, 0.34959078, 0.79702598, 0.90389194, 0.6489…
## $ X9 <dbl> 0.59111440, 0.92819306, 0.01759542, 0.68986918, 0.39696995, 0.5311…
## $ X10 <dbl> 0.588621560, 0.758400814, 0.444118458, 0.445071622, 0.550080800, 0…
testData <- mlbench.friedman1(5000, sd=1)
testData$x <- data.frame(testData$x)
glimpse(testData)## List of 2
## $ x:'data.frame': 5000 obs. of 10 variables:
## ..$ X1 : num [1:5000] 0.4958 0.4078 0.4991 0.1956 0.0228 ...
## ..$ X2 : num [1:5000] 0.261 0.716 0.715 0.369 0.746 ...
## ..$ X3 : num [1:5000] 0.81 0.964 0.681 0.378 0.391 ...
## ..$ X4 : num [1:5000] 0.82318 0.50565 0.00384 0.38569 0.87398 ...
## ..$ X5 : num [1:5000] 0.822 0.88 0.498 0.279 0.197 ...
## ..$ X6 : num [1:5000] 0.3219 0.5745 0.0603 0.5547 0.1762 ...
## ..$ X7 : num [1:5000] 0.0544 0.4552 0.8926 0.3972 0.5067 ...
## ..$ X8 : num [1:5000] 0.519 0.981 0.975 0.84 0.556 ...
## ..$ X9 : num [1:5000] 0.3914 0.6663 0.0856 0.0904 0.379 ...
## ..$ X10: num [1:5000] 0.73894 0.00059 0.59221 0.16227 0.65009 ...
## $ y: num [1:5000] 17.52 20.87 12.82 5.09 10.79 ...
Tune several models on these data.
set.seed(317)
knnfit <- train(trainingData$x,
trainingData$y,
method = "knn",
preProcess = c("center","scale"),
tuneLength = 20,
trControl = trainControl(method = "cv"))
knnfit## k-Nearest Neighbors
##
## 200 samples
## 10 predictor
##
## Pre-processing: centered (10), scaled (10)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 180, 180, 180, 180, 180, 180, ...
## Resampling results across tuning parameters:
##
## k RMSE Rsquared MAE
## 5 3.129963 0.6307340 2.630432
## 7 3.014544 0.6725219 2.474808
## 9 3.009891 0.6866916 2.436532
## 11 3.041647 0.6922912 2.469379
## 13 3.021349 0.7218794 2.453776
## 15 3.048021 0.7287693 2.472147
## 17 3.078646 0.7320769 2.503486
## 19 3.082277 0.7434342 2.505638
## 21 3.135492 0.7305293 2.567035
## 23 3.171086 0.7317535 2.603795
## 25 3.162112 0.7447415 2.602762
## 27 3.228442 0.7314150 2.656904
## 29 3.250834 0.7278217 2.675701
## 31 3.282933 0.7267271 2.688565
## 33 3.290970 0.7350442 2.698592
## 35 3.322869 0.7305981 2.717600
## 37 3.349474 0.7317697 2.717001
## 39 3.371359 0.7308501 2.737718
## 41 3.392752 0.7396462 2.756124
## 43 3.422955 0.7437136 2.784268
##
## RMSE was used to select the optimal model using the smallest value.
## The final value used for the model was k = 9.
knnfit$bestTune## k
## 3 9
plot(knnfit)plot(varImp(knnfit))data.frame(Rsquared=knnfit[["results"]][["Rsquared"]][as.numeric(rownames(knnfit$bestTune))],
RMSE=knnfit[["results"]][["RMSE"]][as.numeric(rownames(knnfit$bestTune))])## Rsquared RMSE
## 1 0.6866916 3.009891
set.seed(317)
svmfit <- train(trainingData$x,
trainingData$y,
method = "svmRadial",
preProcess = c("center","scale"),
tuneLength = 20,
trControl = trainControl(method = "cv"))
svmfit## Support Vector Machines with Radial Basis Function Kernel
##
## 200 samples
## 10 predictor
##
## Pre-processing: centered (10), scaled (10)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 180, 180, 180, 180, 180, 180, ...
## Resampling results across tuning parameters:
##
## C RMSE Rsquared MAE
## 0.25 2.482921 0.8041684 1.987997
## 0.50 2.225629 0.8199832 1.770148
## 1.00 2.050095 0.8408862 1.642206
## 2.00 1.951377 0.8548928 1.553396
## 4.00 1.887021 0.8632458 1.502009
## 8.00 1.855350 0.8658251 1.469802
## 16.00 1.855273 0.8652878 1.471794
## 32.00 1.855180 0.8652888 1.471609
## 64.00 1.855180 0.8652888 1.471609
## 128.00 1.855180 0.8652888 1.471609
## 256.00 1.855180 0.8652888 1.471609
## 512.00 1.855180 0.8652888 1.471609
## 1024.00 1.855180 0.8652888 1.471609
## 2048.00 1.855180 0.8652888 1.471609
## 4096.00 1.855180 0.8652888 1.471609
## 8192.00 1.855180 0.8652888 1.471609
## 16384.00 1.855180 0.8652888 1.471609
## 32768.00 1.855180 0.8652888 1.471609
## 65536.00 1.855180 0.8652888 1.471609
## 131072.00 1.855180 0.8652888 1.471609
##
## Tuning parameter 'sigma' was held constant at a value of 0.06295544
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were sigma = 0.06295544 and C = 32.
svmfit$finalModel## Support Vector Machine object of class "ksvm"
##
## SV type: eps-svr (regression)
## parameter : epsilon = 0.1 cost C = 32
##
## Gaussian Radial Basis kernel function.
## Hyperparameter : sigma = 0.062955443796397
##
## Number of Support Vectors : 152
##
## Objective Function Value : -73.5893
## Training error : 0.0085
plot(svmfit)plot(varImp(svmfit))data.frame(Rsquared=svmfit[["results"]][["Rsquared"]][as.numeric(rownames(svmfit$bestTune))],
RMSE=svmfit[["results"]][["RMSE"]][as.numeric(rownames(svmfit$bestTune))])## Rsquared RMSE
## 1 0.8652888 1.85518
set.seed(317)
marsGrid <- expand.grid(.degree=1:2, .nprune=2:38)
marsfit <- train(trainingData$x,
trainingData$y,
method = "earth",
preProcess = c("center","scale"),
tuneGrid = marsGrid,
trControl = trainControl(method = "cv"))## Loading required package: earth
## Loading required package: Formula
## Loading required package: plotmo
## Loading required package: plotrix
## Loading required package: TeachingDemos
marsfit## Multivariate Adaptive Regression Spline
##
## 200 samples
## 10 predictor
##
## Pre-processing: centered (10), scaled (10)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 180, 180, 180, 180, 180, 180, ...
## Resampling results across tuning parameters:
##
## degree nprune RMSE Rsquared MAE
## 1 2 4.425366 0.2190557 3.6620782
## 1 3 3.510669 0.5027292 2.8172393
## 1 4 2.659861 0.7244814 2.1491495
## 1 5 2.357542 0.7748479 1.8846523
## 1 6 2.267014 0.7950771 1.8032647
## 1 7 1.747556 0.8845023 1.3957204
## 1 8 1.742217 0.8839879 1.3446484
## 1 9 1.686370 0.8895096 1.2940316
## 1 10 1.611802 0.9000011 1.2485375
## 1 11 1.621181 0.8968899 1.2597303
## 1 12 1.608874 0.8973276 1.2577114
## 1 13 1.598875 0.8990619 1.2451770
## 1 14 1.600854 0.8985110 1.2482796
## 1 15 1.600854 0.8985110 1.2482796
## 1 16 1.600854 0.8985110 1.2482796
## 1 17 1.600854 0.8985110 1.2482796
## 1 18 1.600854 0.8985110 1.2482796
## 1 19 1.600854 0.8985110 1.2482796
## 1 20 1.600854 0.8985110 1.2482796
## 1 21 1.600854 0.8985110 1.2482796
## 1 22 1.600854 0.8985110 1.2482796
## 1 23 1.600854 0.8985110 1.2482796
## 1 24 1.600854 0.8985110 1.2482796
## 1 25 1.600854 0.8985110 1.2482796
## 1 26 1.600854 0.8985110 1.2482796
## 1 27 1.600854 0.8985110 1.2482796
## 1 28 1.600854 0.8985110 1.2482796
## 1 29 1.600854 0.8985110 1.2482796
## 1 30 1.600854 0.8985110 1.2482796
## 1 31 1.600854 0.8985110 1.2482796
## 1 32 1.600854 0.8985110 1.2482796
## 1 33 1.600854 0.8985110 1.2482796
## 1 34 1.600854 0.8985110 1.2482796
## 1 35 1.600854 0.8985110 1.2482796
## 1 36 1.600854 0.8985110 1.2482796
## 1 37 1.600854 0.8985110 1.2482796
## 1 38 1.600854 0.8985110 1.2482796
## 2 2 4.549565 0.1746915 3.7544582
## 2 3 3.615256 0.4741270 2.9301983
## 2 4 2.731108 0.7057270 2.1797808
## 2 5 2.361050 0.7739228 1.8736496
## 2 6 2.231880 0.8022071 1.7443082
## 2 7 1.932782 0.8498407 1.5459941
## 2 8 1.788846 0.8794599 1.3858674
## 2 9 1.623900 0.9014211 1.2410832
## 2 10 1.473741 0.9171042 1.1762413
## 2 11 1.432077 0.9268157 1.1481451
## 2 12 1.276945 0.9409982 1.0218556
## 2 13 1.235949 0.9430223 0.9945005
## 2 14 1.195378 0.9473300 0.9628314
## 2 15 1.199243 0.9471786 0.9611487
## 2 16 1.198156 0.9471995 0.9701514
## 2 17 1.198156 0.9471995 0.9701514
## 2 18 1.198156 0.9471995 0.9701514
## 2 19 1.198156 0.9471995 0.9701514
## 2 20 1.198156 0.9471995 0.9701514
## 2 21 1.198156 0.9471995 0.9701514
## 2 22 1.198156 0.9471995 0.9701514
## 2 23 1.198156 0.9471995 0.9701514
## 2 24 1.198156 0.9471995 0.9701514
## 2 25 1.198156 0.9471995 0.9701514
## 2 26 1.198156 0.9471995 0.9701514
## 2 27 1.198156 0.9471995 0.9701514
## 2 28 1.198156 0.9471995 0.9701514
## 2 29 1.198156 0.9471995 0.9701514
## 2 30 1.198156 0.9471995 0.9701514
## 2 31 1.198156 0.9471995 0.9701514
## 2 32 1.198156 0.9471995 0.9701514
## 2 33 1.198156 0.9471995 0.9701514
## 2 34 1.198156 0.9471995 0.9701514
## 2 35 1.198156 0.9471995 0.9701514
## 2 36 1.198156 0.9471995 0.9701514
## 2 37 1.198156 0.9471995 0.9701514
## 2 38 1.198156 0.9471995 0.9701514
##
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were nprune = 14 and degree = 2.
marsfit$bestTune## nprune degree
## 50 14 2
plot(marsfit)plot(varImp(marsfit))data.frame(Rsquared=marsfit[["results"]][["Rsquared"]][as.numeric(rownames(marsfit$bestTune))],
RMSE=marsfit[["results"]][["RMSE"]][as.numeric(rownames(marsfit$bestTune))])## Rsquared RMSE
## 1 0.9471995 1.198156
set.seed(317)
nnetGrid <- expand.grid(.decay=c(0,0.01,.1),
.size=c(1:10),
.bag=FALSE)
nnetfit <- train(trainingData$x,
trainingData$y,
method = "avNNet",
tuneGrid = nnetGrid,
preProcess = c("center","scale"),
linout = TRUE,
trace = FALSE,
MaxNWts =10 * (ncol(trainingData$x)+1) +10+1,
maxit=500)## Warning: executing %dopar% sequentially: no parallel backend registered
nnetfit## Model Averaged Neural Network
##
## 200 samples
## 10 predictor
##
## Pre-processing: centered (10), scaled (10)
## Resampling: Bootstrapped (25 reps)
## Summary of sample sizes: 200, 200, 200, 200, 200, 200, ...
## Resampling results across tuning parameters:
##
## decay size RMSE Rsquared MAE
## 0.00 1 2.558074 0.7354749 2.018750
## 0.00 2 2.551324 0.7322727 2.006227
## 0.00 3 2.346114 0.7746725 1.844363
## 0.00 4 2.774983 0.7050149 2.101071
## 0.00 5 3.419444 0.6062167 2.415627
## 0.00 6 4.951063 0.4876851 3.177594
## 0.00 7 5.761377 0.4080602 3.761079
## 0.00 8 4.788191 0.4487625 3.173436
## 0.00 9 3.579533 0.6186456 2.583179
## 0.00 10 3.188433 0.6596291 2.358802
## 0.01 1 2.528201 0.7392076 1.977816
## 0.01 2 2.540150 0.7338716 2.007651
## 0.01 3 2.331119 0.7736264 1.837698
## 0.01 4 2.365476 0.7719779 1.865425
## 0.01 5 2.584746 0.7330244 2.031432
## 0.01 6 2.675065 0.7208631 2.135200
## 0.01 7 2.741729 0.7094062 2.182309
## 0.01 8 2.724735 0.7068107 2.131129
## 0.01 9 2.654345 0.7162791 2.140507
## 0.01 10 2.643604 0.7185392 2.106341
## 0.10 1 2.524669 0.7388254 1.975027
## 0.10 2 2.570081 0.7270239 2.014844
## 0.10 3 2.277826 0.7854825 1.801937
## 0.10 4 2.268553 0.7881324 1.809673
## 0.10 5 2.374965 0.7694193 1.883229
## 0.10 6 2.518906 0.7442645 1.988500
## 0.10 7 2.509753 0.7472883 1.995038
## 0.10 8 2.495911 0.7495486 1.971962
## 0.10 9 2.493696 0.7469856 1.982746
## 0.10 10 2.498591 0.7449700 1.991270
##
## Tuning parameter 'bag' was held constant at a value of FALSE
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were size = 4, decay = 0.1 and bag = FALSE.
nnetfit$bestTune## size decay bag
## 24 4 0.1 FALSE
plot(nnetfit)plot(varImp(nnetfit))data.frame(Rsquared=nnetfit[["results"]][["Rsquared"]][as.numeric(rownames(nnetfit$bestTune))],
RMSE=nnetfit[["results"]][["RMSE"]][as.numeric(rownames(nnetfit$bestTune))])## Rsquared RMSE
## 1 0.7495486 2.495911
Which models appear to give the best performance? Does MARS select the informative predictors (those named X1–X5)?
set.seed(317)
knn.pred <- predict(knnfit, newdata = testData$x)
svm.pred <- predict(svmfit, newdata = testData$x)
mars.pred <- predict(marsfit, newdata = testData$x)
nnet.pred <- predict(nnetfit, newdata = testData$x)
data.frame(rbind(KNN=postResample(pred=knn.pred,obs = testData$y),
SVM=postResample(pred=svm.pred,obs = testData$y),
MARS=postResample(pred=mars.pred,obs = testData$y),
NNET=postResample(pred=nnet.pred,obs = testData$y)))## RMSE Rsquared MAE
## KNN 3.117232 0.6556622 2.489991
## SVM 2.073617 0.8256703 1.575110
## MARS 1.277999 0.9338365 1.014707
## NNET 2.162285 0.8168289 1.615305
Exercise 6.3 describes data for a chemical manufacturing process. Use the same data imputation, data splitting, and pre-processing steps as before and train several nonlinear regression models
data(ChemicalManufacturingProcess)glimpse(ChemicalManufacturingProcess)## Rows: 176
## Columns: 58
## $ Yield <dbl> 38.00, 42.44, 42.03, 41.42, 42.49, 43.57, 43.12…
## $ BiologicalMaterial01 <dbl> 6.25, 8.01, 8.01, 8.01, 7.47, 6.12, 7.48, 6.94,…
## $ BiologicalMaterial02 <dbl> 49.58, 60.97, 60.97, 60.97, 63.33, 58.36, 64.47…
## $ BiologicalMaterial03 <dbl> 56.97, 67.48, 67.48, 67.48, 72.25, 65.31, 72.41…
## $ BiologicalMaterial04 <dbl> 12.74, 14.65, 14.65, 14.65, 14.02, 15.17, 13.82…
## $ BiologicalMaterial05 <dbl> 19.51, 19.36, 19.36, 19.36, 17.91, 21.79, 17.71…
## $ BiologicalMaterial06 <dbl> 43.73, 53.14, 53.14, 53.14, 54.66, 51.23, 54.45…
## $ BiologicalMaterial07 <dbl> 100, 100, 100, 100, 100, 100, 100, 100, 100, 10…
## $ BiologicalMaterial08 <dbl> 16.66, 19.04, 19.04, 19.04, 18.22, 18.30, 18.72…
## $ BiologicalMaterial09 <dbl> 11.44, 12.55, 12.55, 12.55, 12.80, 12.13, 12.95…
## $ BiologicalMaterial10 <dbl> 3.46, 3.46, 3.46, 3.46, 3.05, 3.78, 3.04, 3.85,…
## $ BiologicalMaterial11 <dbl> 138.09, 153.67, 153.67, 153.67, 147.61, 151.88,…
## $ BiologicalMaterial12 <dbl> 18.83, 21.05, 21.05, 21.05, 21.05, 20.76, 20.75…
## $ ManufacturingProcess01 <dbl> NA, 0.0, 0.0, 0.0, 10.7, 12.0, 11.5, 12.0, 12.0…
## $ ManufacturingProcess02 <dbl> NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0…
## $ ManufacturingProcess03 <dbl> NA, NA, NA, NA, NA, NA, 1.56, 1.55, 1.56, 1.55,…
## $ ManufacturingProcess04 <dbl> NA, 917, 912, 911, 918, 924, 933, 929, 928, 938…
## $ ManufacturingProcess05 <dbl> NA, 1032.2, 1003.6, 1014.6, 1027.5, 1016.8, 988…
## $ ManufacturingProcess06 <dbl> NA, 210.0, 207.1, 213.3, 205.7, 208.9, 210.0, 2…
## $ ManufacturingProcess07 <dbl> NA, 177, 178, 177, 178, 178, 177, 178, 177, 177…
## $ ManufacturingProcess08 <dbl> NA, 178, 178, 177, 178, 178, 178, 178, 177, 177…
## $ ManufacturingProcess09 <dbl> 43.00, 46.57, 45.07, 44.92, 44.96, 45.32, 49.36…
## $ ManufacturingProcess10 <dbl> NA, NA, NA, NA, NA, NA, 11.6, 10.2, 9.7, 10.1, …
## $ ManufacturingProcess11 <dbl> NA, NA, NA, NA, NA, NA, 11.5, 11.3, 11.1, 10.2,…
## $ ManufacturingProcess12 <dbl> NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0…
## $ ManufacturingProcess13 <dbl> 35.5, 34.0, 34.8, 34.8, 34.6, 34.0, 32.4, 33.6,…
## $ ManufacturingProcess14 <dbl> 4898, 4869, 4878, 4897, 4992, 4985, 4745, 4854,…
## $ ManufacturingProcess15 <dbl> 6108, 6095, 6087, 6102, 6233, 6222, 5999, 6105,…
## $ ManufacturingProcess16 <dbl> 4682, 4617, 4617, 4635, 4733, 4786, 4486, 4626,…
## $ ManufacturingProcess17 <dbl> 35.5, 34.0, 34.8, 34.8, 33.9, 33.4, 33.8, 33.6,…
## $ ManufacturingProcess18 <dbl> 4865, 4867, 4877, 4872, 4886, 4862, 4758, 4766,…
## $ ManufacturingProcess19 <dbl> 6049, 6097, 6078, 6073, 6102, 6115, 6013, 6022,…
## $ ManufacturingProcess20 <dbl> 4665, 4621, 4621, 4611, 4659, 4696, 4522, 4552,…
## $ ManufacturingProcess21 <dbl> 0.0, 0.0, 0.0, 0.0, -0.7, -0.6, 1.4, 0.0, 0.0, …
## $ ManufacturingProcess22 <dbl> NA, 3, 4, 5, 8, 9, 1, 2, 3, 4, 6, 7, 8, 10, 11,…
## $ ManufacturingProcess23 <dbl> NA, 0, 1, 2, 4, 1, 1, 2, 3, 1, 3, 4, 1, 2, 3, 4…
## $ ManufacturingProcess24 <dbl> NA, 3, 4, 5, 18, 1, 1, 2, 3, 4, 6, 7, 8, 2, 15,…
## $ ManufacturingProcess25 <dbl> 4873, 4869, 4897, 4892, 4930, 4871, 4795, 4806,…
## $ ManufacturingProcess26 <dbl> 6074, 6107, 6116, 6111, 6151, 6128, 6057, 6059,…
## $ ManufacturingProcess27 <dbl> 4685, 4630, 4637, 4630, 4684, 4687, 4572, 4586,…
## $ ManufacturingProcess28 <dbl> 10.7, 11.2, 11.1, 11.1, 11.3, 11.4, 11.2, 11.1,…
## $ ManufacturingProcess29 <dbl> 21.0, 21.4, 21.3, 21.3, 21.6, 21.7, 21.2, 21.2,…
## $ ManufacturingProcess30 <dbl> 9.9, 9.9, 9.4, 9.4, 9.0, 10.1, 11.2, 10.9, 10.5…
## $ ManufacturingProcess31 <dbl> 69.1, 68.7, 69.3, 69.3, 69.4, 68.2, 67.6, 67.9,…
## $ ManufacturingProcess32 <dbl> 156, 169, 173, 171, 171, 173, 159, 161, 160, 16…
## $ ManufacturingProcess33 <dbl> 66, 66, 66, 68, 70, 70, 65, 65, 65, 66, 67, 67,…
## $ ManufacturingProcess34 <dbl> 2.4, 2.6, 2.6, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.…
## $ ManufacturingProcess35 <dbl> 486, 508, 509, 496, 468, 490, 475, 478, 491, 48…
## $ ManufacturingProcess36 <dbl> 0.019, 0.019, 0.018, 0.018, 0.017, 0.018, 0.019…
## $ ManufacturingProcess37 <dbl> 0.5, 2.0, 0.7, 1.2, 0.2, 0.4, 0.8, 1.0, 1.2, 1.…
## $ ManufacturingProcess38 <dbl> 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 2, 3, 3, 3, 3, 3,…
## $ ManufacturingProcess39 <dbl> 7.2, 7.2, 7.2, 7.2, 7.3, 7.2, 7.3, 7.3, 7.4, 7.…
## $ ManufacturingProcess40 <dbl> NA, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0…
## $ ManufacturingProcess41 <dbl> NA, 0.15, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0…
## $ ManufacturingProcess42 <dbl> 11.6, 11.1, 12.0, 10.6, 11.0, 11.5, 11.7, 11.4,…
## $ ManufacturingProcess43 <dbl> 3.0, 0.9, 1.0, 1.1, 1.1, 2.2, 0.7, 0.8, 0.9, 0.…
## $ ManufacturingProcess44 <dbl> 1.8, 1.9, 1.8, 1.8, 1.7, 1.8, 2.0, 2.0, 1.9, 1.…
## $ ManufacturingProcess45 <dbl> 2.4, 2.2, 2.3, 2.1, 2.1, 2.0, 2.2, 2.2, 2.1, 2.…
The matrix processPredictors contains the 57 predictors (12 describing the input biological material and 45 describing the process predictors) for the 176 manufacturing runs. yield contains the percent yield for each run.
We will first see all the variables having any of the missing values. We have used below complete.cases() function to find the the missing values.
# columns having missing values
colnames(ChemicalManufacturingProcess)[!complete.cases(t(ChemicalManufacturingProcess))]## [1] "ManufacturingProcess01" "ManufacturingProcess02" "ManufacturingProcess03"
## [4] "ManufacturingProcess04" "ManufacturingProcess05" "ManufacturingProcess06"
## [7] "ManufacturingProcess07" "ManufacturingProcess08" "ManufacturingProcess10"
## [10] "ManufacturingProcess11" "ManufacturingProcess12" "ManufacturingProcess14"
## [13] "ManufacturingProcess22" "ManufacturingProcess23" "ManufacturingProcess24"
## [16] "ManufacturingProcess25" "ManufacturingProcess26" "ManufacturingProcess27"
## [19] "ManufacturingProcess28" "ManufacturingProcess29" "ManufacturingProcess30"
## [22] "ManufacturingProcess31" "ManufacturingProcess33" "ManufacturingProcess34"
## [25] "ManufacturingProcess35" "ManufacturingProcess36" "ManufacturingProcess40"
## [28] "ManufacturingProcess41"
So there are 28 columns having missing values. Here is the plot for missing values of all the predictors.
gg_miss_var(ChemicalManufacturingProcess[,-c(1)]) + labs(y = "Sorted by Missing values")We will next use preProcess() method to impute the missing values using knnImpute (K nearest neighbor).
pre.proc <- preProcess(ChemicalManufacturingProcess[,c(-1)], method = "knnImpute")
chem_df <- predict(pre.proc, ChemicalManufacturingProcess[,c(-1)])# columns having missing values
colnames(chem_df)[!complete.cases(t(chem_df))]## character(0)
We will first filter out the predictors that have low frequencies using the nearZeroVar function from the caret package. After applying this function we see 1 column is removed and 56 predictors are left for modeling.
chem.remove.pred <- nearZeroVar(chem_df)
chem_df <- chem_df[,-chem.remove.pred]
length(chem.remove.pred) %>% paste('columns are removed. ', dim(chem_df)[2], ' predictors are left for modeling.') %>% print()## [1] "1 columns are removed. 56 predictors are left for modeling."
We will now look into pairwise correlation above 0.90 and remove the predictors having correlation with cutoff 0.90.
chem.corr.90 <- findCorrelation(cor(chem_df), cutoff=0.90)
chem_df <- chem_df[,-chem.corr.90]
length(chem.corr.90) %>% paste('columns having correlation 0.90 or more are removed. ', dim(chem_df)[2], ' predictors are left for modeling.') %>% print()## [1] "10 columns having correlation 0.90 or more are removed. 46 predictors are left for modeling."
Next step is to split the data in training and testing set. We reserve 70% for training and 30% for testing. After split we will fit elastic net model.
set.seed(786)
pre.proc <- preProcess(chem_df, method = c("center", "scale"))
chem_df <- predict(pre.proc, chem_df)
# partition
chem.part <- createDataPartition(ChemicalManufacturingProcess$Yield, p=0.70, list = FALSE)
# predictor
X.train <- chem_df[chem.part,]
X.test <- chem_df[-chem.part,]
# response
y.train <- ChemicalManufacturingProcess$Yield[chem.part]
y.test <- ChemicalManufacturingProcess$Yield[-chem.part]Which nonlinear regression model gives the optimal resampling and test set performance
set.seed(317)
knnmodel <- train(X.train,
y.train,
method = "knn",
preProcess = c("center","scale"),
tuneLength = 20,
trControl = trainControl(method = "cv"))
knnmodel## k-Nearest Neighbors
##
## 124 samples
## 46 predictor
##
## Pre-processing: centered (46), scaled (46)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 112, 111, 111, 112, 112, 111, ...
## Resampling results across tuning parameters:
##
## k RMSE Rsquared MAE
## 5 1.230194 0.5749894 1.010536
## 7 1.257501 0.5520189 1.038355
## 9 1.264397 0.5384761 1.054920
## 11 1.269283 0.5473221 1.061149
## 13 1.305317 0.5191095 1.084605
## 15 1.285998 0.5382404 1.066163
## 17 1.282823 0.5564207 1.043242
## 19 1.311133 0.5469702 1.072207
## 21 1.334130 0.5360523 1.096845
## 23 1.353056 0.5327438 1.103798
## 25 1.368999 0.5220098 1.118172
## 27 1.386940 0.5118155 1.127164
## 29 1.397704 0.5119520 1.139086
## 31 1.411283 0.5005009 1.149232
## 33 1.420179 0.5032183 1.161620
## 35 1.442249 0.4851713 1.183681
## 37 1.459469 0.4786532 1.199635
## 39 1.461236 0.4840176 1.199649
## 41 1.461482 0.4948347 1.198766
## 43 1.476368 0.4958790 1.208275
##
## RMSE was used to select the optimal model using the smallest value.
## The final value used for the model was k = 5.
knnmodel$bestTune## k
## 1 5
plot(knnmodel)plot(varImp(knnmodel), top = 20)data.frame(Rsquared=knnmodel[["results"]][["Rsquared"]][as.numeric(rownames(knnmodel$bestTune))],
RMSE=knnmodel[["results"]][["RMSE"]][as.numeric(rownames(knnmodel$bestTune))])## Rsquared RMSE
## 1 0.5749894 1.230194
set.seed(317)
svmmodel <- train(X.train,
y.train,
method = "svmRadial",
preProcess = c("center","scale"),
tuneLength = 20,
trControl = trainControl(method = "cv"))
svmmodel## Support Vector Machines with Radial Basis Function Kernel
##
## 124 samples
## 46 predictor
##
## Pre-processing: centered (46), scaled (46)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 112, 111, 111, 112, 112, 111, ...
## Resampling results across tuning parameters:
##
## C RMSE Rsquared MAE
## 0.25 1.369324 0.5127278 1.1303294
## 0.50 1.236519 0.5616219 1.0237851
## 1.00 1.151876 0.6020830 0.9548845
## 2.00 1.122393 0.6208883 0.9268773
## 4.00 1.100222 0.6324424 0.9033605
## 8.00 1.096925 0.6353849 0.9005422
## 16.00 1.096925 0.6353849 0.9005422
## 32.00 1.096925 0.6353849 0.9005422
## 64.00 1.096925 0.6353849 0.9005422
## 128.00 1.096925 0.6353849 0.9005422
## 256.00 1.096925 0.6353849 0.9005422
## 512.00 1.096925 0.6353849 0.9005422
## 1024.00 1.096925 0.6353849 0.9005422
## 2048.00 1.096925 0.6353849 0.9005422
## 4096.00 1.096925 0.6353849 0.9005422
## 8192.00 1.096925 0.6353849 0.9005422
## 16384.00 1.096925 0.6353849 0.9005422
## 32768.00 1.096925 0.6353849 0.9005422
## 65536.00 1.096925 0.6353849 0.9005422
## 131072.00 1.096925 0.6353849 0.9005422
##
## Tuning parameter 'sigma' was held constant at a value of 0.01640464
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were sigma = 0.01640464 and C = 8.
svmmodel$finalModel## Support Vector Machine object of class "ksvm"
##
## SV type: eps-svr (regression)
## parameter : epsilon = 0.1 cost C = 8
##
## Gaussian Radial Basis kernel function.
## Hyperparameter : sigma = 0.0164046423594749
##
## Number of Support Vectors : 103
##
## Objective Function Value : -62.3255
## Training error : 0.009029
plot(svmmodel)plot(varImp(svmmodel), top = 20)data.frame(Rsquared=svmmodel[["results"]][["Rsquared"]][as.numeric(rownames(svmmodel$bestTune))],
RMSE=svmmodel[["results"]][["RMSE"]][as.numeric(rownames(svmmodel$bestTune))])## Rsquared RMSE
## 1 0.6353849 1.096925
set.seed(317)
marsGrid2 <- expand.grid(.degree=1:2, .nprune=2:38)
marsmodel <- train(X.train,
y.train,
method = "earth",
preProcess = c("center","scale"),
tuneGrid = marsGrid2,
trControl = trainControl(method = "cv"))
marsmodel## Multivariate Adaptive Regression Spline
##
## 124 samples
## 46 predictor
##
## Pre-processing: centered (46), scaled (46)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 112, 111, 111, 112, 112, 111, ...
## Resampling results across tuning parameters:
##
## degree nprune RMSE Rsquared MAE
## 1 2 1.378837 0.4584183 1.0980517
## 1 3 1.215122 0.5642999 1.0073364
## 1 4 1.132904 0.6658680 0.9555981
## 1 5 1.100345 0.6619267 0.8980037
## 1 6 1.121527 0.6502298 0.9145325
## 1 7 1.162944 0.6307487 0.9280849
## 1 8 1.219283 0.5962345 0.9606618
## 1 9 1.194364 0.6054373 0.9440048
## 1 10 1.193342 0.6147445 0.9458594
## 1 11 1.214334 0.6146688 0.9761178
## 1 12 1.217438 0.6142425 1.0065333
## 1 13 1.232298 0.5827853 1.0105371
## 1 14 1.236477 0.5751071 1.0056807
## 1 15 1.264532 0.5642905 1.0272468
## 1 16 1.257305 0.5713154 1.0189940
## 1 17 1.269678 0.5760776 1.0193982
## 1 18 1.306111 0.5654149 1.0477744
## 1 19 1.308253 0.5631224 1.0487485
## 1 20 1.303777 0.5686184 1.0486274
## 1 21 1.313125 0.5672977 1.0587722
## 1 22 1.308701 0.5687871 1.0511736
## 1 23 1.317595 0.5638929 1.0553986
## 1 24 1.317595 0.5638929 1.0553986
## 1 25 1.317595 0.5638929 1.0553986
## 1 26 1.317595 0.5638929 1.0553986
## 1 27 1.317595 0.5638929 1.0553986
## 1 28 1.317595 0.5638929 1.0553986
## 1 29 1.317595 0.5638929 1.0553986
## 1 30 1.317595 0.5638929 1.0553986
## 1 31 1.317595 0.5638929 1.0553986
## 1 32 1.317595 0.5638929 1.0553986
## 1 33 1.317595 0.5638929 1.0553986
## 1 34 1.317595 0.5638929 1.0553986
## 1 35 1.317595 0.5638929 1.0553986
## 1 36 1.317595 0.5638929 1.0553986
## 1 37 1.317595 0.5638929 1.0553986
## 1 38 1.317595 0.5638929 1.0553986
## 2 2 1.378837 0.4584183 1.0980517
## 2 3 1.252083 0.5507787 1.0220395
## 2 4 1.164533 0.6144538 0.9692243
## 2 5 1.085452 0.6567089 0.8969018
## 2 6 1.104009 0.6608401 0.8919666
## 2 7 1.123238 0.6558716 0.9097128
## 2 8 2.670801 0.5355185 1.4352452
## 2 9 2.839057 0.5279451 1.5150140
## 2 10 2.840662 0.5482219 1.5134498
## 2 11 2.875029 0.5303081 1.5524105
## 2 12 2.928193 0.5229863 1.5594343
## 2 13 2.916106 0.5396675 1.5603137
## 2 14 2.931491 0.5225589 1.5655010
## 2 15 2.913543 0.5219537 1.5397073
## 2 16 2.926874 0.5166287 1.5490249
## 2 17 2.920114 0.5179998 1.5364330
## 2 18 2.952421 0.4930226 1.5336970
## 2 19 2.963624 0.4906010 1.5430494
## 2 20 2.948643 0.5006846 1.5365215
## 2 21 2.948643 0.5006846 1.5365215
## 2 22 2.899959 0.5212188 1.4997058
## 2 23 2.899959 0.5212188 1.4997058
## 2 24 2.968269 0.4976663 1.5719917
## 2 25 2.968269 0.4976663 1.5719917
## 2 26 2.963379 0.5015659 1.5550592
## 2 27 2.963379 0.5015659 1.5550592
## 2 28 2.963379 0.5015659 1.5550592
## 2 29 2.963379 0.5015659 1.5550592
## 2 30 2.963379 0.5015659 1.5550592
## 2 31 2.963379 0.5015659 1.5550592
## 2 32 2.963379 0.5015659 1.5550592
## 2 33 2.963379 0.5015659 1.5550592
## 2 34 2.963379 0.5015659 1.5550592
## 2 35 2.963379 0.5015659 1.5550592
## 2 36 2.963379 0.5015659 1.5550592
## 2 37 2.963379 0.5015659 1.5550592
## 2 38 2.963379 0.5015659 1.5550592
##
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were nprune = 5 and degree = 2.
marsmodel$bestTune## nprune degree
## 41 5 2
plot(marsmodel)plot(varImp(marsmodel), top=20)data.frame(Rsquared=marsmodel[["results"]][["Rsquared"]][as.numeric(rownames(marsmodel$bestTune))],
RMSE=marsmodel[["results"]][["RMSE"]][as.numeric(rownames(marsmodel$bestTune))])## Rsquared RMSE
## 1 0.5687871 1.308701
set.seed(317)
nnetGrid2 <- expand.grid(.decay=c(0,0.01,.1),
.size=c(1:10),
.bag=FALSE)
nnetmodel <- train(X.train,
y.train,
method = "avNNet",
tuneGrid = nnetGrid2,
preProcess = c("center","scale"),
trControl = trainControl(method = "cv"),
linout = TRUE,
trace = FALSE,
MaxNWts =5 * (ncol(X.train)+1) +5+1,
maxit=500)
nnetmodel## Model Averaged Neural Network
##
## 124 samples
## 46 predictor
##
## Pre-processing: centered (46), scaled (46)
## Resampling: Cross-Validated (10 fold)
## Summary of sample sizes: 112, 111, 111, 112, 112, 111, ...
## Resampling results across tuning parameters:
##
## decay size RMSE Rsquared MAE
## 0.00 1 2.075882 0.4277074 1.500239
## 0.00 2 1.406932 0.4283374 1.174347
## 0.00 3 1.430712 0.4835736 1.152561
## 0.00 4 1.765865 0.3521800 1.384191
## 0.00 5 2.029459 0.3238311 1.626526
## 0.00 6 NaN NaN NaN
## 0.00 7 NaN NaN NaN
## 0.00 8 NaN NaN NaN
## 0.00 9 NaN NaN NaN
## 0.00 10 NaN NaN NaN
## 0.01 1 1.325517 0.5110206 1.043410
## 0.01 2 1.400170 0.4889720 1.136708
## 0.01 3 1.608547 0.4414097 1.314204
## 0.01 4 1.631617 0.4228275 1.334189
## 0.01 5 1.795388 0.3852335 1.438051
## 0.01 6 NaN NaN NaN
## 0.01 7 NaN NaN NaN
## 0.01 8 NaN NaN NaN
## 0.01 9 NaN NaN NaN
## 0.01 10 NaN NaN NaN
## 0.10 1 1.325736 0.5387753 1.062091
## 0.10 2 1.622384 0.4484708 1.296748
## 0.10 3 2.080065 0.3662906 1.502618
## 0.10 4 1.957739 0.3884348 1.485283
## 0.10 5 1.858158 0.4081770 1.396380
## 0.10 6 NaN NaN NaN
## 0.10 7 NaN NaN NaN
## 0.10 8 NaN NaN NaN
## 0.10 9 NaN NaN NaN
## 0.10 10 NaN NaN NaN
##
## Tuning parameter 'bag' was held constant at a value of FALSE
## RMSE was used to select the optimal model using the smallest value.
## The final values used for the model were size = 1, decay = 0.01 and bag = FALSE.
nnetmodel$bestTune## size decay bag
## 11 1 0.01 FALSE
plot(nnetmodel)plot(varImp(nnetmodel), top=20)data.frame(Rsquared=nnetmodel[["results"]][["Rsquared"]][as.numeric(rownames(nnetmodel$bestTune))],
RMSE=nnetmodel[["results"]][["RMSE"]][as.numeric(rownames(nnetmodel$bestTune))])## Rsquared RMSE
## 1 0.4228275 1.631617
set.seed(317)
summary(resamples(list(KNN=knnmodel, SVM=svmmodel, MARS=marsmodel, NNET=nnetmodel)))##
## Call:
## summary.resamples(object = resamples(list(KNN = knnmodel, SVM = svmmodel,
## MARS = marsmodel, NNET = nnetmodel)))
##
## Models: KNN, SVM, MARS, NNET
## Number of resamples: 10
##
## MAE
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## KNN 0.7253333 0.8340208 1.0118974 1.0105361 1.0768942 1.568462 0
## SVM 0.4747285 0.7813261 0.8380018 0.9005422 0.9214638 1.424338 0
## MARS 0.5901402 0.7851895 0.9280215 0.8969018 0.9954994 1.103420 0
## NNET 0.5782253 0.8497158 0.9384080 1.0434099 1.2273551 1.603322 0
##
## RMSE
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## KNN 0.8065920 1.0702274 1.202903 1.230194 1.299813 1.908142 0
## SVM 0.6420855 0.9567874 1.070993 1.096925 1.105920 1.628523 0
## MARS 0.8017703 0.9937101 1.110663 1.085452 1.192513 1.303691 0
## NNET 0.7580223 1.1050175 1.285103 1.325517 1.463984 2.022841 0
##
## Rsquared
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## KNN 0.2983353 0.4877591 0.5755962 0.5749894 0.6968237 0.7425779 0
## SVM 0.3270323 0.5357866 0.6169087 0.6353849 0.7338580 0.9203906 0
## MARS 0.2654970 0.5693497 0.6841259 0.6567089 0.7773202 0.8850418 0
## NNET 0.2943007 0.3625471 0.5300712 0.5110206 0.6014718 0.8303974 0
set.seed(317)
knnpred <- predict(knnmodel, newdata = X.test)
svmpred <- predict(svmmodel, newdata = X.test)
marspred <- predict(marsmodel, newdata = X.test)
nnetpred <- predict(nnetmodel, newdata = X.test)
data.frame(rbind(KNN=postResample(pred=knnpred,obs = y.test),
SVM=postResample(pred=svmpred,obs = y.test),
MARS=postResample(pred=marspred,obs = y.test),
NNET=postResample(pred=nnetpred,obs = y.test)))## RMSE Rsquared MAE
## KNN 1.457193 0.4276234 1.148500
## SVM 1.359472 0.4868642 1.164737
## MARS 1.333481 0.4934157 1.016152
## NNET 1.400185 0.4550918 1.087330
Which predictors are most important in the optimal nonlinear regression model? Do either the biological or process variables dominate the list? How do the top ten important predictors compare to the top ten predictors from the optimal linear model?
Explore the relationships between the top predictors and the response for the predictors that are unique to the optimal nonlinear regression model. Do these plots reveal intuition about the biological or process predictors and their relationship with yield?